import requests
from requests.exceptions import ReadTimeout,ConnectionError,RequestException
from bs4 import BeautifulSoup as Bs
import time
import re
import xlwt 
from xlutils.copy import copy


comme = []

subject = []


def parse_html(html_doc):#获得连接
    lin_k = []
    soup = Bs(html_doc, 'html.parser')
    find_body = soup.find('body') 
    find_div = find_body.find(id = 'list-container')
    for item in find_div.find_all('div',class_='col-xs-8'):

        find_div1=item.find('div',class_='collection-wrap')
        find_a = find_div1.find('a')
        # print(find_a['href'])
        lin_k.append(find_a['href'])
        find_h4 = find_a.find('h4')
        print(find_h4.string)
        comme.append(find_h4.string)

    # print(lin_k)
    return lin_k


def request_url(url_string):
    header = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3514.0 Safari/537.36',
        'Host': 'www.jianshu.com',
        'Cookie': '_m7e_session=e343f4649f8897a6eacc0ad0b93f89b1; read_mode=day; default_font=font2; locale=zh-CN; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%2216560c92be0a2-0de87af96ec5d5-18187150-1049088-16560c92be2234%22%2C%22%24device_id%22%3A%2216560c92be0a2-0de87af96ec5d5-18187150-1049088-16560c92be2234%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E8%87%AA%E7%84%B6%E6%90%9C%E7%B4%A2%E6%B5%81%E9%87%8F%22%2C%22%24latest_referrer%22%3A%22https%3A%2F%2Fwww.baidu.com%2Flink%22%2C%22%24latest_referrer_host%22%3A%22www.baidu.com%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC%22%2C%22%24latest_utm_source%22%3A%22desktop%22%2C%22%24latest_utm_medium%22%3A%22index-collections%22%7D%7D; Hm_lvt_0c0e9d9b1e7d617b3e6842e85b9fb068=1534996024,1535016466,1535017018,1535017425; signin_redirect=https%3A%2F%2Fwww.jianshu.com%2Frecommendations%2Fcollections%3Futm_medium%3Dindex-collections%26utm_source%3Ddesktop; Hm_lpvt_0c0e9d9b1e7d617b3e6842e85b9fb068=1535017606'
    }
    try:
        with requests.get(url_string, headers=header) as f:
            if f.status_code == requests.codes.ok:
                return f.content.decode('utf-8')
            else:
                print('Stats:',f.status_code,f.reason)
    except ReadTimeout:
        print('Time Out')
    except ConnectionError:
        print('Connection Error')
    except RequestException:
        print('Request Error')


def get_html_data(url_string = '/recommendations/collections?utm_medium=index-collections&utm_source=desktop'):
    url_string_1 = r'https://www.jianshu.com'+url_string
    results = request_url(url_string_1)
    if results:
        return results
    else:
        print('No datas')


def parse_revi(html_d):
    sss= []
    soup = Bs(html_d, 'html.parser')
    find_id = soup.find(id = 'list-container')
    find_ul = find_id.find('ul')
    find_lis = find_ul.find_all('li')
    for item in find_lis:
        find_a = item.find('a', class_='title')
        # print(find_a.string)
        sss.append(find_a.string) 
    print(sss)       
    return sss


def saveExcel(title, data):
    wbt = xlwt.Workbook()
    sheet = wbt.add_sheet('人员信息表')
    for i in range(len(title)):
        sheet.write(0, i, title[i])
    for m in range(len(data)):
        for n in range(len(data[m])):
            sheet.write(m+1, n, data[m][n])
    wbt.save('person.xls')

def qqq(comme,subject):
    for i in range(len(comme)):
        subject[10*i].append(comme[i])
    return subject

def main(comme,subject):
    
    html_doc = get_html_data()
    for i in parse_html(html_doc):
        for j in range(1,11):

            html_d = get_html_data(i+'?order_by=added_at&page='+str(j))        
            subject.append(parse_revi(html_d))
        time.sleep(1)
    subject=qqq(comme,subject)
    saveExcel(subject[0],subject[1:])

if __name__ == '__main__':
    main(comme,subject)
