import requests
from requests.exceptions import ReadTimeout, ConnectionError, RequestException
from bs4 import BeautifulSoup as Bs
import time


# 2.1
def parse_html(html_doc):
    # 2.1.1 创建 bs 对象
    soup = Bs(html_doc, 'html.parser')
    find_id = soup.find(id='list-container')
    find_ul = find_id.find('ul')
    find_lis = find_ul.find_all('li')
    for tag in find_lis:
        find_a = tag.find('a', class_='title')
        print(find_a.string)
        find_like = tag.find('i', class_='iconfont ic-list-like')
        print(find_like.next_sibling.string)





# 1.3 获取数据
def request_url(url_string):
    header = {
        'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6)',
        'Host': 'www.jianshu.com'
    }

    try:
        with requests.get(url_string, headers=header) as f:
            if f.status_code == requests.codes.ok:
                return f.content.decode('utf-8')
            else:
                print('Stats:', f.status_code, f.reason)
    except ReadTimeout:
            print('Time Out')
    except ConnectionError:
            print('Connection Error')
    except RequestException:
            print('Request Error')


# 1.2
def get_html_data(page):
    url_string = r'https://www.jianshu.com/c/NEt52a?order_by=commented_at&page='+page
    results = request_url(url_string)
    if results:
        # print(results)
        return results
    else:
        print('No datas')


def main():

    for i in range(1, 2):
        # 1.1请求
        html_doc = get_html_data(str(i))
        # 2.1清洗数据
        # parse_html(html_doc=get_html_data())
        parse_html(html_doc)
        time.sleep(10)


    # 3.1保存数据


if __name__ == '__main__':
    main()


'''
Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6)
https://www.jianshu.com/c/NEt52a?order_by=commented_at&_pjax=%23list-container
https://www.jianshu.com/c/NEt52a?order_by=commented_at&page=1

'''