import requests
from lxml import etree
import re
import pymongo # PyMongo 驱动
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)' 
                  'AppleWebKit/537.36 (KHTML, like Gecko) '
                  'Chrome/78.0.3904.108 Safari/537.36'
}


# MongoDB的连接
myclient = pymongo.MongoClient('localhost', 27017)		#mondodb默认端口号是27017
mydb = myclient["test"]           #创建数据库webpag
dblist = myclient.list_database_names()
if "test" in dblist:		#检测数据库是否存在
    print("该数据库存在")
mycol = mydb['qiongyou1']

collist = mydb.list_collection_names()
if "qiongyou1" in collist:	#检测集合是否存在
    print("该集合存在")

def get_link(url):  # 获取每一页的所有链接
    res = requests.get(url, headers)	#GET请求
    selector = etree.HTML(res.text)		# 利用 etree.HTML 把字符串解析成 HTML文件
    href = selector.xpath('/html/body/div[5]/div[2]/div/div/a[1]/@href')
    links=[]
    for j in href:
        links.append(re.sub('^//', 'http://', str(j)))
        print(j)
    return links


def get_link1(link):  # 获取第二个（日期阅览）链接
    res = requests.get(link, headers)	#GET请求
    selector = etree.HTML(res.text)		# 利用 etree.HTML 把字符串解析成 HTML文件
    href = selector.xpath('/html/body/nav[1]/div/ul/li[2]/a/@href')
    link1s = []
    for j in href:
        link1s.append(re.sub('^/calendar', 'http://plan.qyer.com/calendar', str(j)))
    return link1s
'''
def page_scratch(href):  # 抓取公示详细页面信息
    res = requests.get(href, headers)	  #GET请求
    res.encoding = res.apparent_encoding  # 更改可能的编码方式也可以直接用"GB2312"
    selector = etree.HTML(res.text)		  # 利用 etree.HTML 把字符串解析成 HTML 文件
    # 日期
    date1 = selector.xpath('/html/body/div[2]/div[2]/table/tbody/tr[1]/td[2]/div/span/text()')
    date2 = selector.xpath('/html/body/div[2]/div[2]/table/tbody/tr[1]/td[2]/div/em/text()')
    date3 = selector.xpath('/html/body/div[2]/div[2]/table/tbody/tr[1]/td[2]/div/strong/text()')
    date = '{} {} {}'.format(date1, date2, date3)

    # 城市
    city1 = selector.xpath('/html/body/div[2]/div[2]/table/tbody/tr[1]/td[3]/div/ul/li/span[1]/a/text()')  # 中文
    city2 = selector.xpath('/html/body/div[2]/div[2]/table/tbody/tr[1]/td[3]/div/ul/li/span[2]/a/text()')  # 英语
    # city=city1+city2
    city = '{} {}'.format(city1, city2)

    # 交通
    traffic1 = selector.xpath('/html/body/div[2]/div[2]/table/tbody/tr[3]/td[4]/div/div/p/text()[1]')
    traffic2 = selector.xpath('/html/body/div[2]/div[2]/table/tbody/tr[3]/td[4]/div/div/p/text()[2]')
    traffic='{}---{}'.format(traffic1,traffic2)
    # 景点
    scane = selector.xpath('/html/body/div[2]/div[2]/table/tbody/tr[1]/td[5]/div/ul/li/span/a/text()')

    mycol.insert_one({'日期': date, '城市': city, '交通': traffic, '景点': scane})
'''
if __name__=='__main__':
    url = 'http://plan.qyer.com/search_0_0_11_0_0_0_1/'
    links = get_link(url)
    for link in links:
        link1=get_link1(link)
        print(link1)
       # page_scratch(link1)


