View Single Post
Old 03-02-2010, 02:07 PM   #1
oddeyed
hi there!
oddeyed began at the beginning.
 
Posts: 17
Karma: 10
Join Date: Feb 2010
Location: London, UK
Device: Kindle 3rd Generation - 3G + Wifi - Graphite
Post Delete News Sections

Hi everyone,

So when I do eventually take the plunge and get a reader, I want to be able to access news feeds on it.

I would use the feedbooks self-updating automagical Newspapers, but seeing as my soon-to-be-reader, even if it is a Kindle, won't have that functionality (I'm not in the US ) I thought I'd use the Calibre news output because it can have pictures .

I have been doing test creations with the Guardian feeds on my laptop, and without fail, they include the sports section, G2 and the Entertainment which I don't want, even though I have edited the script to not get these.

So, does anyone know how to stop this from happening?

Thanks,
oddeyed

Below is my custom recipe:
Spoiler:
Code:
#!/usr/bin/env  python
__license__   = 'GPL v3'
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en'

'''
www.guardian.co.uk
'''
from calibre import strftime
from calibre.web.feeds.news import BasicNewsRecipe

class Guardian(BasicNewsRecipe):

    title = u'The Guardian - Top Stories'
    language = 'en_GB'

    oldest_article = 2
    max_articles_per_feed = 5
    remove_javascript = True

    timefmt = ' [%a, %d %b %Y]'
    keep_only_tags = [
                      dict(name='div', attrs={'id':["content","article_header","main-article-info",]}),
                           ]
    remove_tags = [
                        dict(name='div', attrs={'class':["video-content","videos-third-column"]}),
                        dict(name='div', attrs={'id':["article-toolbox","subscribe-feeds",]}),
                        dict(name='ul', attrs={'class':["pagination"]}),
                        dict(name='ul', attrs={'id':["content-actions"]}),
                        ]
    use_embedded_content    = False

    no_stylesheets = True
    extra_css = '''
                    .article-attributes{font-size: x-small; font-family:Arial,Helvetica,sans-serif;}
                    .h1{font-size: large ;font-family:georgia,serif; font-weight:bold;}
                    .stand-first-alone{color:#666666; font-size:small; font-family:Arial,Helvetica,sans-serif;}
                    .caption{color:#666666; font-size:x-small; font-family:Arial,Helvetica,sans-serif;}
                    #article-wrapper{font-size:small; font-family:Arial,Helvetica,sans-serif;font-weight:normal;}
                    .main-article-info{font-family:Arial,Helvetica,sans-serif;}
                    #full-contents{font-size:small; font-family:Arial,Helvetica,sans-serif;font-weight:normal;}
                    #match-stats-summary{font-size:small; font-family:Arial,Helvetica,sans-serif;font-weight:normal;}
                '''

    feeds = [
        ('Top Stories','http://www.guardian.co.uk/theguardian/mainsection/topstories/rss'
         ),
        ]

    def get_article_url(self, article):
          url = article.get('guid', None)
          if '/video/' in url or '/flyer/' in url or '/quiz/' in url or \
              '/gallery/' in url  or 'ivebeenthere' in url or \
              'pickthescore' in url or 'audioslideshow' in url or \
	    '/sport' in url or 'educationguardian' in url or 'football'\
	    or  '/films' in url:
              url = None
          return url

    def preprocess_html(self, soup):

          for item in soup.findAll(style=True):
              del item['style']

          for item in soup.findAll(face=True):
              del item['face']
          for tag in soup.findAll(name=['ul','li']):
                tag.name = 'div'

          return soup

    def find_sections(self):
        soup = self.index_to_soup('http://www.guardian.co.uk/theguardian')
        # find cover pic
        img = soup.find( 'img',attrs ={'alt':'Guardian digital edition'})
        if img is not None:
            self.cover_url = img['src']
        # end find cover pic

        idx = soup.find('div', id='book-index')
        for s in idx.findAll('strong', attrs={'class':'book'}):
            a = s.find('a', href=True)
            yield (self.tag_to_string(a), a['href'])

    def find_articles(self, url):
        soup = self.index_to_soup(url)
        div = soup.find('div', attrs={'class':'book-index'})
        for ul in div.findAll('ul', attrs={'class':'trailblock'}):
            for li in ul.findAll('li'):
                a = li.find(href=True)
                if not a:
                    continue
                title = self.tag_to_string(a)
                url = a['href']
                if not title or not url:
                    continue
                tt = li.find('div', attrs={'class':'trailtext'})
                if tt is not None:
                    for da in tt.findAll('a'): da.extract()
                    desc = self.tag_to_string(tt).strip()
                yield {
                        'title': title, 'url':url, 'description':desc,
                        'date' : strftime('%a, %d %b'),
                        }

    def parse_index(self):
        try:
            feeds = []
            for title, href in self.find_sections():
                feeds.append((title, list(self.find_articles(href))))
            return feeds
        except:
            raise NotImplementedError
oddeyed is offline   Reply With Quote