![]() |
#1 |
Junior Member
![]() Posts: 3
Karma: 10
Join Date: Nov 2011
Device: Kindle Keyboard
|
Request - How to exclude images from news fetched?
Hi,
I am new to Calibre and I find it a very powerful software. I am a regular reader of The Economist and am using Calibre to download the newspaper. However, I don't like pictures associated with the news articles as they seem to make the file size very big (about 3.5 mb/issue – I am assuming it is the images that are increasing the file size). I prefer smaller sizes of files because I do not want my Kindle to fill up quickly. So I was wondering if anyone could be so kind as to tell me how to tweak the programming codes to exclude images from being downloaded. I would like to give it a try myself first, but I possess no knowledge on programming. Looking at the codes, I have no idea what I should do, even though I took a look at the user manual section on advanced code editing. I have pasted below the Calibre source codes for The Economist. If someone could help me with my request, I'd very much appreciate it. Thanks! -------------------------- #!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>' ''' economist.com ''' from calibre.web.feeds.news import BasicNewsRecipe from calibre.ebooks.BeautifulSoup import BeautifulSoup from calibre.ebooks.BeautifulSoup import Tag, NavigableString import string, time, re class Economist(BasicNewsRecipe): title = 'The Economist' language = 'en' __author__ = "Kovid Goyal" INDEX = 'http://www.economist.com/printedition' description = ('Global news and current affairs from a European' ' perspective. Best downloaded on Friday mornings (GMT)') extra_css = '.headline {font-size: x-large;} \n h2 { font-size: small; } \n h1 { font-size: medium; }' oldest_article = 7.0 cover_url = 'http://www.economist.com/images/covers/currentcoverus_large.jpg' remove_tags = [ dict(name=['script', 'noscript', 'title', 'iframe', 'cf_floatingcontent']), dict(attrs={'class':['dblClkTrk', 'ec-article-info', 'share_inline_header']}), {'class': lambda x: x and 'share-links-header' in x}, ] keep_only_tags = [dict(id='ec-article-body')] needs_subscription = False no_stylesheets = True preprocess_regexps = [(re.compile('</html>.*', re.DOTALL), lambda x:'</html>')] ''' def get_browser(self): br = BasicNewsRecipe.get_browser() br.open('http://www.economist.com') req = mechanize.Request( 'http://www.economist.com/members/members.cfm?act=exec_login', headers = { 'Referer':'http://www.economist.com/', }, data=urllib.urlencode({ 'logging_in' : 'Y', 'returnURL' : '/', 'email_address': self.username, 'fakepword' : 'Password', 'pword' : self.password, 'x' : '0', 'y' : '0', })) br.open(req).read() return br ''' def parse_index(self): try: return self.economist_parse_index() except: raise self.log.warn( 'Initial attempt to parse index failed, retrying in 30 seconds') time.sleep(30) return self.economist_parse_index() def economist_parse_index(self): soup = BeautifulSoup(self.browser.open(self.INDEX).read() , convertEntities=BeautifulSoup.HTML_ENTITIES) index_started = False feeds = {} ans = [] key = None for tag in soup.findAll(['h1', 'h2']): text = ''.join(tag.findAll(text=True)) if tag.name in ('h1', 'h2') and 'Classified ads' in text: break if tag.name == 'h1': if 'The world this week' in text or 'The world this year' in text: index_started = True if not index_started: continue text = string.capwords(text) if text not in feeds.keys(): feeds[text] = [] if text not in ans: ans.append(text) key = text continue if key is None: continue a = tag.find('a', href=True) if a is not None: url=a['href'] id_ = re.search(r'story_id=(\d+)', url).group(1) url = 'http://www.economist.com/node/%s/print'%id_ if url.startswith('Printer'): url = '/'+url if url.startswith('/'): url = 'http://www.economist.com' + url try: subtitle = tag.previousSibling.contents[0].contents[0] text = subtitle + ': ' + text except: pass article = dict(title=text, url = url, description='', content='', date='') feeds[key].append(article) ans = [(key, feeds[key]) for key in ans if feeds.has_key(key)] if not ans: raise Exception('Could not find any articles. Has your subscription expired?') return ans def eco_find_image_tables(self, soup): for x in soup.findAll('table', align=['right', 'center']): if len(x.findAll('font')) in (1,2) and len(x.findAll('img')) == 1: yield x def postprocess_html(self, soup, first): body = soup.find('body') for name, val in body.attrs: del body[name] for table in list(self.eco_find_image_tables(soup)): caption = table.find('font') img = table.find('img') div = Tag(soup, 'div') div['style'] = 'text-align:left;font-size:70%' ns = NavigableString(self.tag_to_string(caption)) div.insert(0, ns) div.insert(1, Tag(soup, 'br')) del img['width'] del img['height'] img.extract() div.insert(2, img) table.replaceWith(div) return soup |
![]() |
![]() |
![]() |
#2 |
Junior Member
![]() Posts: 8
Karma: 10
Join Date: Oct 2011
Device: Kindle 3g
|
I hope this works for you.
To remove images you need to add 'img' to the excluded tags. Code:
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>' ''' economist.com ''' from calibre.web.feeds.news import BasicNewsRecipe from calibre.ebooks.BeautifulSoup import Tag, NavigableString from collections import OrderedDict import time, re class Economist(BasicNewsRecipe): title = 'The Economist' language = 'en' __author__ = "Kovid Goyal" INDEX = 'http://www.economist.com/printedition' description = ('Global news and current affairs from a European' ' perspective. Best downloaded on Friday mornings (GMT)') extra_css = '.headline {font-size: x-large;} \n h2 { font-size: small; } \n h1 { font-size: medium; }' oldest_article = 7.0 remove_tags = [ dict(name=['img', 'script', 'noscript', 'title', 'iframe', 'cf_floatingcontent']), dict(attrs={'class':['dblClkTrk', 'ec-article-info', 'share_inline_header', 'related-items']}), {'class': lambda x: x and 'share-links-header' in x}, ] keep_only_tags = [dict(id='ec-article-body')] needs_subscription = False no_stylesheets = True preprocess_regexps = [(re.compile('</html>.*', re.DOTALL), lambda x:'</html>')] # economist.com has started throttling after about 60% of the total has # downloaded with connection reset by peer (104) errors. delay = 1 def get_cover_url(self): br = self.browser br.open(self.INDEX) issue = br.geturl().split('/')[4] self.log('Fetching cover for issue: %s'%issue) cover_url = "http://media.economist.com/sites/default/files/imagecache/print-cover-full/print-covers/%s_CNA400.jpg" %(issue.translate(None,'-')) return cover_url def parse_index(self): try: return self.economist_parse_index() except: raise self.log.warn( 'Initial attempt to parse index failed, retrying in 30 seconds') time.sleep(30) return self.economist_parse_index() def economist_parse_index(self): soup = self.index_to_soup(self.INDEX) div = soup.find('div', attrs={'class':'issue-image'}) if div is not None: img = div.find('img', src=True) if img is not None: self.cover_url = img['src'] feeds = OrderedDict() for section in soup.findAll(attrs={'class':lambda x: x and 'section' in x}): h4 = section.find('h4') if h4 is None: continue section_title = self.tag_to_string(h4).strip() if not section_title: continue self.log('Found section: %s'%section_title) articles = [] subsection = '' for node in section.findAll(attrs={'class':'article'}): subsec = node.findPreviousSibling('h5') if subsec is not None: subsection = self.tag_to_string(subsec) prefix = (subsection+': ') if subsection else '' a = node.find('a', href=True) if a is not None: url = a['href'] if url.startswith('/'): url = 'http://www.economist.com'+url url += '/print' title = self.tag_to_string(a) if title: title = prefix + title self.log('\tFound article:', title) articles.append({'title':title, 'url':url, 'description':'', 'date':''}) if articles: if section_title not in feeds: feeds[section_title] = [] feeds[section_title] += articles ans = [(key, val) for key, val in feeds.iteritems()] if not ans: raise Exception('Could not find any articles, either the ' 'economist.com server is having trouble and you should ' 'try later or the website format has changed and the ' 'recipe needs to be updated.') return ans def eco_find_image_tables(self, soup): for x in soup.findAll('table', align=['right', 'center']): if len(x.findAll('font')) in (1,2) and len(x.findAll('img')) == 1: yield x def postprocess_html(self, soup, first): body = soup.find('body') for name, val in body.attrs: del body[name] for table in list(self.eco_find_image_tables(soup)): caption = table.find('font') img = table.find('img') div = Tag(soup, 'div') div['style'] = 'text-align:left;font-size:70%' ns = NavigableString(self.tag_to_string(caption)) div.insert(0, ns) div.insert(1, Tag(soup, 'br')) del img['width'] del img['height'] img.extract() div.insert(2, img) table.replaceWith(div) return soup |
![]() |
![]() |
Advert | |
|
![]() |
|
![]() |
||||
Thread | Thread Starter | Forum | Replies | Last Post |
Table of Contents in News Fetched | fab4.ilam | Calibre | 0 | 09-24-2011 02:10 AM |
Fetched News saved to wrong location on Sony PRS-505 | shoukyd | Devices | 5 | 02-25-2011 08:49 PM |
Calibre not emailing fetched news to Kindle | pierda | Calibre | 1 | 12-12-2010 08:53 PM |
Fetched news emailed as PDF, links don't work | gertblij | Calibre | 7 | 10-22-2010 11:22 AM |
fetched news default author changed with 0.7.0 | artisticforge | Calibre | 5 | 06-16-2010 03:29 PM |