View Single Post
Old 03-21-2011, 08:29 PM   #5
partymonkey
Junior Member
partymonkey began at the beginning.
 
Posts: 9
Karma: 10
Join Date: Feb 2011
Device: Kindle
Thanks for your help.

I've finally been able to get this recipe to accomplish what I wanted. My apologies if I'm violating python/calibre good practices, but I'd figure I'd share this in case someone else has a digital or print subscription to The Economist and wants to download older issues.
In the username field, enter a date in the format YYYYMMDD, and in the password field just enter a single character. The date must correspond to the issue date on The Economist issue. (Also, I believe you need to be logged into your Economist account on a browser first, so that a cookie gets created and remembers you, but I'm not 100% sure this is the way it works).

In any case, QUESTION: is there a way to set the Publish Date on the recipe to the actual issue date?

Anyhow, here it is in case someone wants it.

Cheers.
==================

Code:
#!/usr/bin/env  python

__license__   = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
'''
economist.com
'''
from calibre.web.feeds.news import BasicNewsRecipe
from calibre.ebooks.BeautifulSoup import BeautifulSoup
from calibre.ebooks.BeautifulSoup import Tag, NavigableString

import string, time, re

class Economist(BasicNewsRecipe):

    title = 'The Economist (old issues)' 
    language = 'en'

    __author__ = "Kovid Goyal"
    INDEX = 'http://www.economist.com/printedition/index.cfm?d='
    description = ' - Global news and current affairs from a European perspective.'

    oldest_article = 7.0
    economist_cover_url = 'http://www.economist.com/images/images-magazine/'
    cover_url = None
    cover_suffix = '_CNA400.jpg'
    remove_tags = [
            dict(name=['script', 'noscript', 'title', 'iframe', 'cf_floatingcontent']),
            dict(attrs={'class':['dblClkTrk', 'ec-article-info', 'share_inline_header']}),
            {'class': lambda x: x and 'share-links-header' in x},
    ]
    keep_only_tags = [dict(id='ec-article-body')]
    needs_subscription = True
    no_stylesheets = True
    preprocess_regexps = [(re.compile('</html>.*', re.DOTALL),
        lambda x:'</html>')]

    '''
    def get_browser(self):
        br = BasicNewsRecipe.get_browser()
        br.open('http://www.economist.com')
        req = mechanize.Request(
                'http://www.economist.com/members/members.cfm?act=exec_login',
                headers = {
                    'Referer':'http://www.economist.com/',
                    },
                data=urllib.urlencode({
                    'logging_in' : 'Y',
                    'returnURL'  : '/',
                    'email_address': self.username,
                    'fakepword' : 'Password',
                    'pword'     : self.password,
                    'x'         : '0',
                    'y'         : '0',
                    }))
        br.open(req).read()
        return br
    '''

    def get_cover_url(self):
        issuedate = self.username
        self.log.info('Issuedate is: ' + issuedate)
        iyr = issuedate[0:4]
        imo = issuedate[4:-2]
        idt = issuedate[6:]
        self.cover_url = self.economist_cover_url + iyr + '/' + imo + '/' + idt + '/CN/' + issuedate + self.cover_suffix
        self.log.info('Cover url is: ' + self.cover_url)
        return self.cover_url

    def get_masthead_title(self):
        issuedate = self.username
        iyr = issuedate[0:4]
        imo = issuedate[4:-2]
        idt = issuedate[6:]
        self.title = 'The Economist (' + iyr + '/' + imo + '/' + idt + ')'
        return self.title

    def parse_index(self):
        try:
            return self.economist_parse_index()
        except:
            raise
            self.log.warn(
                'Initial attempt to parse index failed, retrying in 30 seconds')
            time.sleep(30)
            return self.economist_parse_index()

    def economist_parse_index(self):
        self.INDEX = self.INDEX + self.username
        soup = BeautifulSoup(self.browser.open(self.INDEX).read(),
                             convertEntities=BeautifulSoup.HTML_ENTITIES)
        index_started = False
        feeds = {}
        ans = []
        key = None
        for tag in soup.findAll(['h1', 'h2']):
            text = ''.join(tag.findAll(text=True))
            if tag.name in ('h1', 'h2') and 'Classified ads' in text:
                break
            if tag.name == 'h1':
                if 'The world this week' in text or 'The world this year' in text:
                    index_started = True
                if not index_started:
                    continue
                text = string.capwords(text)
                if text not in feeds.keys():
                    feeds[text] = []
                if text not in ans:
                    ans.append(text)
                key = text
                continue
            if key is None:
                continue
            a = tag.find('a', href=True)
            if a is not None:
                url=a['href']
                id_ = re.search(r'story_id=(\d+)', url).group(1)
                url = 'http://www.economist.com/node/%s/print'%id_
                if url.startswith('Printer'):
                    url = '/'+url
                if url.startswith('/'):
                    url = 'http://www.economist.com' + url
                try:
                   subtitle = tag.previousSibling.contents[0].contents[0]
                   text = subtitle + ': ' + text
                except:
                   pass
                article = dict(title=text,
                    url = url,
                    description='', content='', date='')
                feeds[key].append(article)

        ans = [(key, feeds[key]) for key in ans if feeds.has_key(key)]
        if not ans:
            raise Exception('Could not find any articles. Has your subscription expired?')
        return ans

    def eco_find_image_tables(self, soup):
        for x in soup.findAll('table', align=['right', 'center']):
            if len(x.findAll('font')) in (1,2) and len(x.findAll('img')) == 1:
                yield x

    def postprocess_html(self, soup, first):
        body = soup.find('body')
        for name, val in body.attrs:
            del body[name]

        for table in list(self.eco_find_image_tables(soup)):
            caption = table.find('font')
            img = table.find('img')
            div = Tag(soup, 'div')
            div['style'] = 'text-align:left;font-size:70%'
            ns = NavigableString(self.tag_to_string(caption))
            div.insert(0, ns)
            div.insert(1, Tag(soup, 'br'))
            del img['width']
            del img['height']
            img.extract()
            div.insert(2, img)
            table.replaceWith(div)
        return soup

Last edited by kovidgoyal; 03-21-2011 at 08:49 PM.
partymonkey is offline   Reply With Quote