from calibre.web.feeds.news import BasicNewsRecipe, classes
from calibre import browser
from collections import OrderedDict
import re

def absurl(url):
    if url.startswith('/'):
        url = 'https://www.hbr.org/' + url
    return url

class HBR(BasicNewsRecipe):
    title = 'Harvard Business Review'
    __author__ = 'unkn0wn'
    description = (
        'Harvard Business Review is the leading destination for smart management thinking.'
        ' Through its flagship magazine, books, and digital content and tools published on HBR.org,'
        ' Harvard Business Review aims to provide professionals around the world with rigorous insights'
        ' and best practices to help lead themselves and their organizations more effectively and to make a positive impact.')
    language = 'en'
    use_embedded_content = False
    no_stylesheets = True
    remove_javascript = True
    masthead_url = 'http://hbr.org/resources/css/images/hbr_logo.svg'
    remove_attributes = ['height', 'width', 'style']
    encoding = 'utf-8'
    ignore_duplicate_articles = {'url'}
    resolve_internal_links = True
    
    extra_css = '''
        .article-summary, .article-ideainbrief, .description-text, .link--black {font-size:small; color:#202020;}
        .credits--hero-image, .credits--inline-image, .caption--inline-image {font-size:small; text-align:center;}
        .article-byline-list {font-size:small; font-weight:bold;}
        .question {font-weight:bold;}
        .right-rail--container {font-size:small; color:#404040;}
        .article-callout, .slug-content {color:#404040;}
        .article-sidebar {color:#202020;}
    '''

    keep_only_tags = [
        classes(
            'slug-container headline-container hero-image-content article-summary article-body '
            'standard-content article-dek-group article-dek'
        )
    ]

    remove_tags = [
        classes(
            'left-rail--container translate-message follow-topic newsletter-container'
        )
    ]

    def parse_index(self):
        soup = self.index_to_soup('https://hbr.org/magazine')
        div = soup.find(**classes('backdrop-lightest'))
        a = div.find('a', href=lambda x: x and x.startswith('/archive-toc/'))
        index = absurl(a['href'])
        self.timefmt = ' [' + self.tag_to_string(div.find('h2')) + ']'
        self.log('Downloading issue: ', index, self.timefmt)
        cov_url = a.find('img', src=True)
        if cov_url:
            self.cover_url = absurl(cov_url['src'])
        soup = self.index_to_soup(index)

        feeds = OrderedDict()

        for h3 in soup.findAll('h3', attrs={'class': 'hed'}):
            articles = []
            a = h3.find('a')
            title = self.tag_to_string(a)
            url = absurl(a['href'])
            auth = ''
            div = h3.find_next_sibling('div', attrs={'class': 'stream-item-info'})
            if div:
                aut = self.tag_to_string(div).replace('Magazine Article ', '')
                auth = re.sub(r"(?<=\w)([A-Z])", r", \1", aut)
            des = ''
            dek = h3.find_next_sibling('div', attrs={'class': 'dek'})
            if dek:
                des = self.tag_to_string(dek)
            desc = des + ' |' + auth.title()
            section_title = 'Articles'
            sec = h3.findParent('li').find_previous_sibling('div', **classes('stream-section-label')).find('h4')
            if sec:
                section_title = self.tag_to_string(sec).title()
            self.log(section_title, '\n\t', title, '\n\t', desc, '\n\t\t', url)
            articles.append({'title': title, 'url': url, 'description': desc})
            if articles:
                if section_title not in feeds:
                    feeds[section_title] = []
                feeds[section_title] += articles
        ans = [(key, val) for key, val in feeds.items()]
        return ans

    def preprocess_html(self, soup):
        for slug in soup.findAll(**classes('slug-content')):
            del slug['href']
        for dek in soup.findAll(**classes('article-byline')):
            for by in dek.findAll('span', attrs={'class':'by-prefix'}):
                by.extract()
            for li in dek.findAll('li'):
                li.name = 'span'
        for div in soup.findAll('div', attrs={'class':['article-summary', 'article-callout']}):
            div.name = 'blockquote'
        for sidebar in soup.findAll(('article-sidebar', 'article-ideainbrief')):
            sidebar.name = 'blockquote'
        return soup

    # HBR changes the content it delivers based on cookies, so the
    # following ensures that we send no cookies
    def get_browser(self, *args, **kwargs):
        return self

    def clone_browser(self, *args, **kwargs):
        return self.get_browser()

    def open_novisit(self, *args, **kwargs):
        br = browser()
        return br.open_novisit(*args, **kwargs)

    open = open_novisit


calibre_most_common_ua = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.87 Safari/537.36'