import requests
import time
import xlwt
from requests.exceptions import ReadTimeout, ConnectionError, RequestException
from bs4 import BeautifulSoup as Bs

# 解析这个  html 格式 并爬出来每一个小网页的内容
def parse_html(html_doc):
    soup = Bs(html_doc, 'html.parser')
    find_id = soup.find(id='list-container')
    # print(find_id.name)
    find_ul = find_id.find('ul')
    find_lis = find_ul.find_all('li')
    # find_all下面跟着FOR 循环
    # print(find_lis)
    li = []
    for tag in find_lis:
        find_a = tag.find('a', class_='title')
        find_p = tag.find('p',class_='abstract')
        li.append(find_a.string)
        li.append(find_p.string)
        # print(find_a.string)
        # print(find_p.string)
    return li

# 1.3 获取数据
def request_url(url_string):
    header = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36',
        'Host': 'www.jianshu.com'
    }
    try:
        response = requests.get(url_string, headers=header)
        if response.status_code == 200:
            return response.content.decode('utf-8')
        else:
            print('Stats', response.status_code)
    except ReadTimeout:
        print('Time out')
    except ConnectionError:
        print('connection Error')
    except RequestException:
        print('RequestException')

def gain_url():
    header = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36',
        'Host': 'www.jianshu.com'
    }

    url_string = r'https://www.jianshu.com/recommendations/collections?utm_medium=index-collections&utm_source=desktop'
    # 第一个界面的url
    result = request_url(url_string)

    soup = Bs(result, 'html.parser')
    # 转化格式 固定

    name = []
    href = []

    for i in soup.find_all(class_='col-xs-8'):
        for item in i.find_all('a',target='_blank'):
            'a标签必须写 取的是ITEM 的href 的那段网址'
            her = 'https://www.jianshu.com'+item.attrs['href']+'?order_by=commented_at&page='
            href.append(her)
    #         取出的网址放在href列表中
    for i in soup.find_all(class_='col-xs-8'):
        for j in i.find_all('h4'):
            # 找出名字 即 j。string 放在 name列表中
            name.append(j.string)
    dic = zip(tuple(name),tuple(set(href)))
    # 创建字典 将name 网址 写成元组格式，网址有重复，set转化集合 没有重复
    return list(dic)


# 1.2
def get_html_data(http, pag):
    url_string = r'%s'%http+pag
    # 拼接网站
    result = request_url(url_string)
    if result:
        return result
    else:
        print('No datas')


def main():
    info_all_list = []
    http = gain_url()
    for j in http:
        info_all_list.append([j[0]])
        for i in range(1, 11):
            # one_info.append(j[0])
            html_doc = get_html_data(j[1], str(i))
            info = parse_html(html_doc)
            # for k in info:
            # one_info.append(info)
            time.sleep(1)
            info_all_list.append(info)
    return info_all_list


if __name__ == '__main__':
    info_all_list = main()
    new_workbook = xlwt.Workbook()
    new_content = new_workbook.add_sheet('基本信息')
    for i in range(len(info_all_list)):
        for j in range(len(info_all_list[i])):
            new_content.write(i, j, info_all_list[i][j])
    new_workbook.save('简书.xls')

