View Single Post
Old 04-24-2019, 03:20 PM   #4
lui1
Enthusiast
lui1 began at the beginning.
 
Posts: 36
Karma: 10
Join Date: Dec 2017
Location: Los Angeles, CA
Device: Smart Phone
Update for the Global Times

Your welcome . The problem is that they are using br line breaks instead of p elements to seperate there paragraphs. This update should improve the layout.

Update for Global Times:
Code:
from calibre.web.feeds.news import BasicNewsRecipe
import re


def classes(classes):
    q = frozenset(classes.split(' '))
    return dict(
        attrs={'class': lambda x: x and frozenset(x.split()).intersection(q)}
    )


class GlobalTimes(BasicNewsRecipe):
    title = u'Global Times'
    __author__ = 'Jose Ortiz'  # lui1 at mobileread.com
    language = 'en_CN'
    oldest_article = 7
    max_articles_per_feed = 100
    no_stylesheets = True
    keep_only_tags = [classes('article-title article-source row-content')]

    preprocess_regexps = [
        (re.compile(r'(?:<(?:br(?:\s*/)?|/br\s*)>(?:\s|'
                    '\xA0' r'|&nbsp;)*){2,9}',
                    re.U | re.I),
         lambda match: '<p>')
    ]

    extra_css = '''
        :root {
            font-family: Arial, Helvetica, sans-serif;
        }

        .article-title {
            font-weight: bold;
            font-size: large;
        }

        .article-source, .row-content {
            font-size:small;
        }
        '''

    def parse_index(self):
        catnames = {}
        catnames["http://www.globaltimes.cn/china/politics/"] = "China Politics"
        catnames["http://www.globaltimes.cn/china/diplomacy/"] = "China Diplomacy"
        catnames["http://www.globaltimes.cn/china/military/"] = "China Military"
        catnames["http://www.globaltimes.cn/business/economy/"] = "China Economy"
        catnames["http://www.globaltimes.cn/world/asia-pacific/"] = "Asia Pacific"
        feeds = []

        for cat in catnames.keys():
            articles = []
            soup = self.index_to_soup(cat)
            for a in soup.findAll(
                'a',
                attrs={
                    'href':
                    re.compile(
                        r'https?://www.globaltimes.cn/content/[0-9]{4,10}[.]shtml'
                    )
                }
            ):
                # Typical url http://www.globaltimes.cn/content/5555555.shtml
                url = a['href'].strip()
                title = self.tag_to_string(a).strip()
                if not title:
                    continue
                myarticle = ({
                    'title': title,
                    'url': url,
                    'description': '',
                    'date': ''
                })
                self.log("found '%s'" % title)
                articles.append(myarticle)
                self.log("Adding URL %s\n" % url)
            if articles:
                feeds.append((catnames[cat], articles))
        return feeds

    def postprocess_html(self, soup, first_fetch):
        for p in [p for p in soup('p') if len(p) == 0]:
            p.extract()
        return soup

Last edited by lui1; 04-24-2019 at 07:27 PM. Reason: made a few more improvements to the recipe
lui1 is offline   Reply With Quote