# -*- encoding: utf-8 -*-
"""
@File    :   get_all_content.py
@Time    :   2020/07/13 16:25:21
@Author  :   Song Zewen 
@Version :   1.0
@Contact :   stg1205@163.com
@License :   (C)Copyright 2020-2021, Liugroup-NLPR-CASIA
@Desc    :   CSDN和网易新闻爬正文内容
"""


from bs4 import BeautifulSoup
import .crawler_util as cu
from collections import deque
import getopt
import sys


HREF_SELECTOR = {
    'csdn': 'h2 > a, h4 > a',
    'news163': 'h3 > a, h2 > a, li > a, h4 > a'
}
 

def save_content(content, count, website):
    file_path = './' + website + '/' + str(count) + '.txt'
    with open(file_path, 'w', encoding='utf-8') as f:
        f.write(content)
        f.close()
    
    # print("succesfully save {}.txt".format(count))
    
    
def save_all_content(url, website):

    url_queue = deque([])
    url_queue.append(url)
    
    # ensure unique url
    url_set = set(url)
    
    count = 0
    
    while count <= 100000 and len(url_queue) >= 0:
        url = url_queue.pop()
        html = cu.get_html(url)
        
        if html:
            soup = BeautifulSoup(html, 'html.parser')
            
            content = cu.content_extractor(soup, website)
            
            if content and len(content) > 300:
                save_content(content, count, website)
                count = count + 1
            
            a_labels = soup.select(HREF_SELECTOR[website])
            
            if a_labels:
                for a in a_labels:
                    href = a.get('href')
                    if href not in url_set:
                        url_set.add(href)
                        url_queue.append(href)
    

if __name__ == '__main__':
    url, website = '', ''
    
    try:
        opts, args = getopt.getopt(sys.argv[1:], 'u:w:', ["url=", "website="])
    except getopt.GetoptError:
        print('get_all_htmls.py -u <url> -w <website>') 
        sys.exit(2)
    for opt, arg in opts:
        if opt in ('-u', '--url'):
            url = arg
        elif opt in ('-w', '--website'):
            website = arg
            
    # url = "https://blog.csdn.net/"
    # url = "https://news.163.com/world/"
    # website = 'news163'
    save_all_content(url, website)
    