#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 25 16:43:44 2022

@author: cythnia
"""
##爬虫第4课：多页爬取文章信息
#————————————————————————————————————#
#导入工具包
import requests
from bs4 import BeautifulSoup
import time
import pandas as pd
import numpy as np
#爬取网站地址
url='http://www.199it.com/archives/category/emerging/health-tech'
#设置请求头
headers={
    'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X -1_0_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'
    }
#获取网页信息
html=requests.get(url,headers=headers)
data=html.text
#网页信息转换
soup=BeautifulSoup(data,'lxml')
#获取所需数据
name=soup.select('div.entry-content > h2 > a') #受到id限制，只返回一个数据，所以将id删除
for i in name:
    print(i.get_text())
shijian=soup.select(' div.entry-content > aside.meta-row.row-3 > div:nth-child(1) > ul > li.post-time > time')
for i in shijian:
    print(i.get_text())
lianjie=soup.select('div.entry-content > h2 > a')
#==================================================
#单页爬取
#==================================================
lis=[]
for i in zip(name,shijian,lianjie):
    lis.append([i[0].get_text(),i[1].get_text(),i[2]['href']])
result=pd.DataFrame(lis,columns=['名称','时间','链接'])
result.to_excel('/Users/cythnia/Desktop/单页爬取.xlsx',index=False)
#====================================================
#多页爬取
#====================================================
#列举网页，发现每页之间的变化差距
'''
http://www.199it.com/archives/category/emerging/health-tech 

http://www.199it.com/archives/category/emerging/health-tech/page/2

http://www.199it.com/archives/category/emerging/health-tech/page/11

'''
#for循环，设立网址 生成了11页的11个网址
url=['http://www.199it.com/archives/category/emerging/health-tech/page/{}'.format(i) for i in range(1,12)]
#设置一个空list存放数据
lis=[]
#for循环，使得11页循环上述单页爬取操作
for urli in url:
    htmli=requests.get(urli,headers=headers)
    datai=htmli.text
    soupi=BeautifulSoup(datai,'lxml')
    namei=soupi.select('div.entry-content > h2 > a') #受到id限制，只返回一个数据，所以将id删除
    # for i in namei:
    #     print(i.get_text())
    shijiani=soupi.select(' div.entry-content > aside.meta-row.row-3 > div:nth-child(1) > ul > li.post-time > time')
    # for i in shijiani:
    #     print(i.get_text())
    lianjiei=soupi.select('div.entry-content > h2 > a')
    for i in zip(namei,shijiani,lianjiei):
        lis.append([i[0].get_text(),i[1].get_text(),i[2]['href']])
    print('完成:',urli)
    time.sleep(np.random.randint(5,10))
resulti=pd.DataFrame(lis,columns=['名称','时间','链接'])
resulti.to_excel('/Users/cythnia/Desktop/多页爬取.xlsx',index=False)
    