View Single Post
Old 09-25-2011, 07:48 AM   #12
a.peter
Enthusiast
a.peter began at the beginning.
 
Posts: 28
Karma: 10
Join Date: Sep 2011
Device: Sony PRS-350, Kindle Touch
Quote:
Originally Posted by macpablus View Post
My goal is to generate a new feed containing only the comic strip from...

http://www.pagina12.com.ar/diario/ultimas/index.html
Hi macpablus, i found time to have a look at your recipe.

First of all i saw that the daily comic is located at http://www.pagina12.com.ar/diario/principal/index.html.

All i had to do was to add this page as a single feed 'Humor' with a single article. Then i modified the postprocess_html. I tried to find a div with id='rudy_paz'. When this div is present, i extracted the div from the soup, removed all content from the soups body, added the image again and returned the soup.

Spoiler:
Code:
        # Try to find the div containing the image
        image = soup.find('div', attrs={'id':'rudy_paz'})
        if image:
            # if found, extract the div, clear the body and add the image again. Finished.
            image.extract()
            while len(soup.body) > 0:
                soup.body.next.extract()
            soup.body.insert(0, image)
            return soup


Then the remove_tags_before seems not to work as i expected so i removed it.

The complete recipe is here:

Spoiler:
Code:
#!/usr/bin/env  python

__license__   = 'GPL v3'
__copyright__ = '2011, Pablo Marfill'
'''
Calibre recipe to convert the news site pagina12.com.ar to an ebook
'''
import re

from calibre.web.feeds.news import BasicNewsRecipe
from calibre.ebooks.BeautifulSoup import Tag, NavigableString

class Pagina12(BasicNewsRecipe):

    title      = 'Pagina/12 - Edicion Impresa'
    __author__ = 'Pablo Marfil, a.peter'
    description = 'Diario argentino'
    INDEX = 'http://www.pagina12.com.ar/diario/secciones/index.html'
    language = 'es'
    encoding              = 'cp1252'
    #remove_tags_before = [dict(id='fecha')] # Is a list of dictionaries. But did not work. Why? I don't know.
    remove_tags_after  = [dict(id='fin')]   # Is a list of dictionaries
    remove_tags        = [dict(id=['volver', 'logo', 'fecha', 'fin', 'pageControls', 'logo_suple', 'fecha_suple'])]
    masthead_url          = 'http://www.pagina12.com.ar/commons/imgs/logo-home.gif'
    no_stylesheets = True

    preprocess_regexps= [(re.compile(r'<!DOCTYPE[^>]+>', re.I), lambda m:'')]

    def get_cover_url(self):
        soup = self.index_to_soup('http://www.pagina12.com.ar/diario/principal/diario/index.html')
        for image in soup.findAll('img',alt=True):
           if image['alt'].startswith('Tapa de la fecha'):
              return image['src']
              print image
        return None


    def parse_index(self):
        articles = []
        numero = 1
        raw = self.index_to_soup('http://www.pagina12.com.ar/diario/secciones/index.html', raw=True)
        raw = re.sub(r'(?i)<!DOCTYPE[^>]+>', '', raw)
        soup = self.index_to_soup(raw)
        
        feeds = []
        feeds.append(('Humor', [{'title':'Rudy y Daniel Paz', 'url':'http://www.pagina12.com.ar/diario/principal/index.html', 'description':'Daily comic', 'date':''}]))

        seen_titles = set([])
        for section in soup.findAll('div','seccionx'):
            numero+=1
            print (numero)
            section_title = self.tag_to_string(section.find('div','desplegable_titulo on_principal right'))
            self.log('Found section:', section_title)
            articles = []
            for post in section.findAll('h2'):
                h = post.find('a', href=True)
                title = self.tag_to_string(h)
                if title in seen_titles:
                    continue
                seen_titles.add(title)
                a = post.find('a', href=True)
                url = a['href']
                if url.startswith('/'):
                    url = 'http://pagina12.com.ar/imprimir'+url
                p = post.find('div', attrs={'h2'})
                desc = None
                self.log('\tFound article:', title, 'at', url)
                if p is not None:
                    desc = self.tag_to_string(p)
                    self.log('\t\t', desc)
                articles.append({'title':title, 'url':url, 'description':desc,
                    'date':''})
            if articles:
                feeds.append((section_title, articles))
        return feeds


    def postprocess_html(self, soup, first):
        # Added by a.peter:
        # Try to find the div containing the image
        image = soup.find('div', attrs={'id':'rudy_paz'})
        if image:
            # if found, extract the div, clear the body and add the image again. Finished.
            image.extract()
            while len(soup.body) > 0:
                soup.body.next.extract()
            soup.body.insert(0, image)
            return soup

        for table in soup.findAll('table', align='right'):
            img = table.find('img')
            if img is not None:
                img.extract()
                caption = self.tag_to_string(table).strip()
                div = Tag(soup, 'div')
                div['style'] = 'text-align:center'
                div.insert(0, img)
                div.insert(1, Tag(soup, 'br'))
                if caption:
                    div.insert(2, NavigableString(caption))
                table.replaceWith(div)

        return soup


In the debug mode (only two feeds with each two articles) produced the following output:

Pagina12.epub

I put your name into the copyright and added myself as a co-author
a.peter is offline   Reply With Quote