View Single Post
Old 01-15-2011, 11:43 PM   #3
Junior Member
guterm began at the beginning.
Posts: 9
Karma: 10
Join Date: Jan 2011
Device: Sony PRS-650
And here is even more polished recipe.
Failing downloads are fixed, sending right away to the mobile site avoiding redirect, other minor tweaks.


#!/usr/bin/env  python
__license__   = 'GPL v3'

__copyright__ = '2011, Szing, guterm'
__docformat__ = 'restructuredtext en'


from import BasicNewsRecipe

class TheGlobeAndMailAdvancedRecipe(BasicNewsRecipe):
    title          = u'The Globe And Mail'
    __license__   = 'GPL v3'
    __author__ = 'Szing, guterm'
    oldest_article = 2
    no_stylesheets = True
    max_articles_per_feed = 100
    encoding               = 'utf8'
    publisher              = 'Globe & Mail'
    language               = 'en_CA'
    extra_css = 'p.meta {font-size:75%}\n .redtext {color: red;}\n .byline {font-size: 70%}'

    feeds          = [
      (u'Top National Stories', u''),
      (u'Business', u''),
      (u'Investing', u''),
      (u'Politics', u''),
      (u'Commentary', u''),
      (u'Toronto', u''),
      (u'Facts & Arguments', u''),
      (u'Technology', u''),
      (u'Arts', u''),
      (u'Life', u''),
      (u'Real Estate', u''),
      (u'Auto', u''),
      (u'Sports', u'')

    keep_only_tags = [
	      dict(name='h2', attrs={'id':'articletitle'}),
	      dict(name='p', attrs={'class':['leadText', 'meta', 'leadImage', 'redtext byline', 'bodyText']}),
	      dict(name='div', attrs={'class':['news','articlemeta','articlecopy','columnist', 'blog']}),
	      dict(name='id', attrs={'class':'article'}),
	      dict(name='table', attrs={'class':'todays-market'}),
	      dict(name='header', attrs={'id':'leadheader'})

    remove_tags = [
	dict(name='ul', attrs={'class':['pillboxcontainer arttoolsbpbx']}),
	dict(name='div', attrs={'class':['relcont', 'articleTools', 'ypad fontsmall', 'pagination']}),
	dict(name='a', attrs={'href':['javascript:void(0);', '']}),
	dict(name='div', attrs={'id':['ShareArticles', 'topStories', 'seealsobottom']})

    def postprocess_html(self, soup, first_fetch):
	# Find and preserve single page article layout, can be first or last
	allArts = soup.findAll(True, {'id':'article'})
	if len(allArts)==2:

	return soup
    def parse_feeds(self, *args, **kwargs):
        parsed_feeds = BasicNewsRecipe.parse_feeds(self, *args, **kwargs)
        # Eliminate the duplicates
        urlSet = set()
        for feed in parsed_feeds:
	    newArticles = []
            for article in feed:
		if article.url in urlSet:
		    feed.articles.remove( article )

	    feed.articles = newArticles
        return parsed_feeds

    cover_url = ''

    #Use the mobile version rather than the web version
    def print_version(self, url):
	return (url.replace('cmpid=rss1','service=mobile')).replace('http://www.','http://m.')
guterm is offline   Reply With Quote