import requests
from bs4 import BeautifulSoup as Bs
from  requests.exceptions import ReadTimeout,ConnectionError,RequestException
import time, xlwt, xlrd
from xlutils.copy import copy
list_sum=[]

def parse_html(html_doc):
    list_href=[]
    soup = Bs(html_doc, 'html.parser')
    find_id = soup.find_all('div', class_='col-xs-8')
    for tag in find_id:
        find_a = tag.find('div', class_='collection-wrap')
        find_b = find_a.find('a')
        list_href.append(find_b['href'])
    return list_href


def parse_htmls(html_doc):
    soup = Bs(html_doc, 'html.parser')
    find_id = soup.find(id='list-container')
    find_ul = find_id.find('ul')
    find_ls = find_ul.find_all('li')
    for tag in find_ls:
        list1=[]
        find_a = tag.find('p', class_='abstract')
        print(find_a.string)
        list1.append(find_a.string)
    list_sum.append(list1)
    print(list_sum)

def reques_url(url_string):
    header = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64)',
        'Host': 'www.jianshu.com'
    }
    try:
        with requests.get(url_string,headers=header) as f:
            if f.status_code == requests.codes.ok:
                return f.content.decode('utf-8')
            else:
                print('status:', f.status_code, f.reason)
    except ReadTimeout:
        print('Time Out')
    except ConnectionError:
        print('Connect Error')
    except RequestException:
        print('Request Error')


def get_html_datas(list_href, num):
    print(list_href)
    for i in list_href:
        url_string = r'https://www.jianshu.com'+i+'?order_by=commented_at&page='+num
        requests = reques_url(url_string)
        if requests:
            parse_htmls(requests)
            # return requests
        else:
            print('No datas')


def get_html_data():
    url_string = r'https://www.jianshu.com/recommendations/collections?utm_medium=index-collections&utm_source=desktop'
    requests = reques_url(url_string)
    if requests:
        return requests
    else:
        print('No datas')


def main():
    print(list_sum)
    for num in range(1,3):
        list_href=parse_html(get_html_data())
        get_html_datas(list_href, str(num))
        time.sleep(5)
    wbt = xlwt.Workbook()
    sheet = wbt.add_sheet('信息表')
    for m in range(len(list_sum)):
        for n in range(len(list_sum[m])):
            sheet.write(m+1, n, list_sum[m][n])
    wbt.save('jianshu.xls')


if __name__ == '__main__':
    main()
