import requests
from requests.exceptions import ReadTimeout,ConnectionError,RequestException
from bs4 import BeautifulSoup as Bs
import time,xlwt,xlrd,os
from xlutils.copy import copy
p = os.path.dirname(__file__)
parent_path = os.path.dirname(p)
path = parent_path+'/book.xls'
print(path)

def get_html_data():#获取主题下URL
    url_string = r'https://www.jianshu.com/recommendations/collections?utm_medium=index-collections&utm_source=desktop'
    results = request_url(url_string)
    if requests:
        return results
    else:
        print('no datas')





def request_url(url_string):#错误机制
    header = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64)',
        'Host': 'www.jianshu.com'
    }
    try:
        with requests.get(url_string,headers=header) as f:
            if f.status_code == requests.codes.ok:
                return f.content.decode('utf-8')
            else:
                print('Stats',f.status_code,f.reason)
    except ReadTimeout:
        print('Time Out')
    except ConnectionError:
        print('Connection Error')
    except RequestException:
        print('RequestError')



def parse_html(html_doc):#获取各个标题的链接
    list_web = []
    soup = Bs(html_doc,'html.parser')
    find_id = soup.find(id='list-container')
    find_ul = find_id.find_all('div',class_='collection-wrap')
    for item in find_ul:
        find_a = item.find('a')
        list_web.append(find_a['href'])
    return list_web


def get_html_data1(web,page):#获的各个子标题的URL
    url_string = r'https://www.jianshu.com'+web+'?order_by=commented_at&page='+page
    results = request_url(url_string)
    if requests:
        # print(results)
        return results
    else:
        print('no datas')


def parse_row(get_html_data1,list_all):#获的每一个子标题下第一页的评论
    soup = Bs(get_html_data1, 'html.parser')
    find_title = soup.find('a',class_='name')
    find_id = soup.find(id='list-container')
    # print(find_id.name)
    find_ul = find_id.find('ul')
    find_lis = find_ul.find_all('li')
    for tag in find_lis:
        find_p = tag.find('p', class_='abstract')
        list_all.append(find_p.string.strip())
    return ([find_title.string]+list_all)

def add_data(slist):
    add_wt = xlrd.open_workbook(path)
    row = add_wt.sheets()[0].nrows
    new_add_wt = copy(add_wt)
    sheet = new_add_wt.get_sheet(0)
    i = 0
    for j in slist:
        sheet.write(row, i, j)
        i += 1
    new_add_wt.save(path)


def main():
    html_doc = get_html_data()
    list_web=parse_html(html_doc)
    for i in list_web:
        for m in range(1,3):
            list_all=[]
            rest= get_html_data1(i,str(m))
            list_all=parse_row(rest,list_all)
            print('+++++')
            add_data(list_all)
            time.sleep(5)




if __name__ == '__main__':
    main()
