from bs4 import BeautifulSoup, Comment
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import re
from lxml import etree
import time


def filter_tags(html_str):
    soup = BeautifulSoup(html_str, "lxml")
    try:
        title = soup.title.string.encode().decode('utf-8')
    except:
        title = ''
    [script.extract() for script in soup.findAll('script')]
    [style.extract() for style in soup.findAll('style')]
    comments = soup.findAll(text=lambda text: isinstance(text, Comment))
    [comment.extract() for comment in comments]
    reg1 = re.compile("<[^>]*>")
    content = reg1.sub('', soup.prettify()).split('\n')
    return title, content


def getcontent(lst, title, authorset):
    lstlen = [len(x) for x in lst]
    threshold = 50
    startindex = 0
    maxindex = lstlen.index(max(lstlen))
    endindex = 0
    for i, v in enumerate(lstlen[:maxindex - 3]):
        if v > threshold and lstlen[i + 1] > 5 and lstlen[i + 2] > 5 and lstlen[i + 3] > 5:
            startindex = i
            break
    for i, v in enumerate(lstlen[maxindex:]):
        if v < threshold and lstlen[maxindex + i + 1] < 10 and lstlen[maxindex + i + 2] < 10 and lstlen[maxindex + i + 3] < 10:
            endindex = i
            break
    content = [x.strip() for x in lst[startindex:endindex + maxindex] if len(x.strip()) > 0]
    return content


def getcontentfromweb(url):
    chrome_options = Options()
    # chrome_options.add_argument('--headless')
    # prefs = {"profile.managed_default_content_settings.images": 2}
    # chrome_options.add_experimental_option("prefs", prefs)
    driver = webdriver.Chrome(chrome_options=chrome_options)
    driver.get(url)
    time.sleep(10)
    html = driver.page_source
    with open('test.html', 'w', encoding='utf-8') as f:
        f.write(html)
    driver.close()
    return html


def get_title(html):
    response = etree.HTML(html)
    title = response.xpath('//*[contains(@class,"title")]//text()')
    if len(title) == 0:
        title = response.xpath('//h1/text()')
    if len(title) == 0:
        title = response.xpath('//h2/text()')
    title = ''.join(title)
    return title


def get_content(html, url):
    authorset = {'责任编辑', '作者'}
    title, content = filter_tags(html)
    newcontent = getcontent(content, title, authorset)
    ctt = ''.join(newcontent).strip()
    if len(ctt) < 150:
        html = getcontentfromweb(url)
        with open('test.html', 'w', encoding='utf-8') as f:
            f.write(html)
        title, content = filter_tags(html)
        newcontent = getcontent(content, title, authorset)
        ctt = ''.join(newcontent)
    return title, ctt
